bitkeeper revision 1.749.1.2 (403e097cnc0BYoVqLwFH7-TpqyBF_w)
authorkaf24@scramble.cl.cam.ac.uk <kaf24@scramble.cl.cam.ac.uk>
Thu, 26 Feb 2004 14:58:04 +0000 (14:58 +0000)
committerkaf24@scramble.cl.cam.ac.uk <kaf24@scramble.cl.cam.ac.uk>
Thu, 26 Feb 2004 14:58:04 +0000 (14:58 +0000)
xc_evtchn.c:
  new file
event_channel.h, event_channel.c, Xc.c, xc_private.h, xc.h:
  Plumb event channels thru to Python wrapper.

.rootkeys
tools/xc/lib/xc.h
tools/xc/lib/xc_evtchn.c [new file with mode: 0644]
tools/xc/lib/xc_private.h
tools/xc/py/Xc.c
xen/common/event_channel.c
xen/include/hypervisor-ifs/event_channel.h

index 9e80ed605d2ef13b9d82e303bfe43d6b6ed1bcb1..77c43a761858526cd29cfaf3f6d65b9720d69052 100644 (file)
--- a/.rootkeys
+++ b/.rootkeys
@@ -74,6 +74,7 @@
 3fbba6dbEVkVMX0JuDFzap9jeaucGA tools/xc/lib/xc_bvtsched.c
 3fbba6dbasJQV-MVElDC0DGSHMiL5w tools/xc/lib/xc_domain.c
 40278d99BLsfUv3qxv0I8C1sClZ0ow tools/xc/lib/xc_elf.h
+403e0977Bjsm_e82pwvl9VvaJxh8Gg tools/xc/lib/xc_evtchn.c
 3fbba6dbNCU7U6nsMYiXzKkp3ztaJg tools/xc/lib/xc_linux_build.c
 3fbba6dbl267zZOAVHYLOdLCdhcZMw tools/xc/lib/xc_linux_restore.c
 3fbba6db7li3FJiABYtCmuGxOJxEGw tools/xc/lib/xc_linux_save.c
index 74a4fca2ed2f2a002e167969119c476e8564b5c3..2200177932e9111663b5a04e4f277dd58653a81d 100644 (file)
@@ -156,6 +156,27 @@ int xc_vbd_probe(int xc_handle,
                  unsigned int max_vbds,
                  xc_vbd_t *vbds);
 
+#define DOMID_SELF              (~1ULL)
+#define EVTCHNSTAT_closed       0  /* Chennel is not in use.              */
+#define EVTCHNSTAT_disconnected 1  /* Channel is not connected to remote. */
+#define EVTCHNSTAT_connected    2  /* Channel is connected to remote.     */
+int xc_evtchn_open(int xc_handle,
+                   u64 dom1,   /* may be DOMID_SELF */
+                   u64 dom2,
+                   int *port1,
+                   int *port2);
+int xc_evtchn_close(int xc_handle,
+                    u64 dom,   /* may be DOMID_SELF */
+                    int port);
+int xc_evtchn_send(int xc_handle,
+                   int local_port);
+int xc_evtchn_status(int xc_handle,
+                     u64 dom1, /* may be DOMID_SELF */
+                     int port1,
+                     u64 *dom2,
+                     int *port2,
+                     int *chn_status);
+
 int xc_readconsolering(int xc_handle,
                        char *str, 
                        unsigned int max_chars, 
diff --git a/tools/xc/lib/xc_evtchn.c b/tools/xc/lib/xc_evtchn.c
new file mode 100644 (file)
index 0000000..b3e17f8
--- /dev/null
@@ -0,0 +1,104 @@
+/******************************************************************************
+ * xc_evtchn.c
+ * 
+ * API for manipulating and accessing inter-domain event channels.
+ * 
+ * Copyright (c) 2004, K A Fraser.
+ */
+
+#include "xc_private.h"
+
+static int do_evtchn_op(int xc_handle, evtchn_op_t *op)
+{
+    int ret = -1;
+    privcmd_hypercall_t hypercall;
+
+    hypercall.op     = __HYPERVISOR_event_channel_op;
+    hypercall.arg[0] = (unsigned long)op;
+
+    if ( mlock(op, sizeof(*op)) != 0 )
+    {
+        PERROR("Could not lock memory for Xen hypercall");
+        goto out1;
+    }
+
+    if ( (ret = do_xen_hypercall(xc_handle, &hypercall)) < 0 )
+        goto out2;
+
+ out2: (void)munlock(op, sizeof(*op));
+ out1: return ret;
+}
+
+int xc_evtchn_open(int xc_handle,
+                   u64 dom1,
+                   u64 dom2,
+                   int *port1,
+                   int *port2)
+{
+    evtchn_op_t op;
+    int         rc;
+
+    op.cmd = EVTCHNOP_open;
+    op.u.open.dom1 = (domid_t)dom1;
+    op.u.open.dom2 = (domid_t)dom2;
+   
+    if ( (rc = do_evtchn_op(xc_handle, &op)) == 0 )
+    {
+        if ( port1 != NULL )
+            *port1 = op.u.open.port1;
+        if ( port2 != NULL )
+            *port2 = op.u.open.port2;
+    }
+    
+    return rc;
+}
+
+
+int xc_evtchn_close(int xc_handle,
+                    u64 dom,
+                    int port)
+{
+    evtchn_op_t op;
+    op.cmd = EVTCHNOP_close;
+    op.u.close.dom  = (domid_t)dom;
+    op.u.close.port = port;
+    return do_evtchn_op(xc_handle, &op);
+}
+
+
+int xc_evtchn_send(int xc_handle,
+                   int local_port)
+{
+    evtchn_op_t op;
+    op.cmd = EVTCHNOP_send;
+    op.u.send.local_port = local_port;
+    return do_evtchn_op(xc_handle, &op);
+}
+
+
+int xc_evtchn_status(int xc_handle,
+                     u64 dom1,
+                     int port1,
+                     u64 *dom2,
+                     int *port2,
+                     int *chn_status)
+{
+    evtchn_op_t op;
+    int         rc;
+
+    op.cmd = EVTCHNOP_status;
+    op.u.status.dom1  = (domid_t)dom1;
+    op.u.status.port1 = port1;
+   
+    if ( (rc = do_evtchn_op(xc_handle, &op)) == 0 )
+    {
+        if ( dom2 != NULL )
+            *dom2 = (u64)op.u.status.dom2;
+        if ( port2 != NULL )
+            *port2 = op.u.status.port2;
+        if ( chn_status != NULL )
+            *chn_status = op.u.status.status;
+    }
+    
+    return rc;
+}
index f5e2c25247d1c27bda6714ffc0ea66b7e769cf90..2b5d5604cdbce5cdf694a4234dac2df488a427d6 100644 (file)
@@ -20,6 +20,7 @@
 #include <hypervisor-ifs/hypervisor-if.h>
 #include <hypervisor-ifs/dom0_ops.h>
 #include <hypervisor-ifs/vbd.h>
+#include <hypervisor-ifs/event_channel.h>
 
 #define _PAGE_PRESENT   0x001
 #define _PAGE_RW        0x002
index 2376be5eae639a1a7197d8b059c0dc223d2c75f8..fe29da1cde19cdbe276856ce76f10832df88d318 100644 (file)
@@ -765,6 +765,133 @@ static PyObject *pyxc_vbd_probe(PyObject *self,
     return list;
 }
 
+static PyObject *pyxc_evtchn_open(PyObject *self,
+                                  PyObject *args,
+                                  PyObject *kwds)
+{
+    XcObject *xc = (XcObject *)self;
+    PyObject *dict;
+
+    u64 dom1 = DOMID_SELF, dom2;
+    int port1, port2, ret;
+
+    static char *kwd_list[] = { "dom2", "dom1", NULL };
+
+    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "L|L", kwd_list, 
+                                      &dom2, &dom1) )
+    {
+        DPRINTF("could not parse parameter list.");
+        return NULL;
+    }
+
+    ret = xc_evtchn_open(xc->xc_handle, dom1, dom2, &port1, &port2);
+
+    if ( ret < 0 )
+        dict = Py_BuildValue("{}");
+    else
+        dict = Py_BuildValue("{s:i,s:i}", 
+                             "port1", port1,
+                             "port2", port2);
+    
+    return dict;
+}
+
+static PyObject *pyxc_evtchn_close(PyObject *self,
+                                   PyObject *args,
+                                   PyObject *kwds)
+{
+    XcObject *xc = (XcObject *)self;
+
+    u64 dom = DOMID_SELF;
+    int port, ret;
+
+    static char *kwd_list[] = { "port", "dom", NULL };
+
+    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "i|L", kwd_list, 
+                                      &port, &dom) )
+    {
+        DPRINTF("could not parse parameter list.");
+        return NULL;
+    }
+
+    ret = xc_evtchn_close(xc->xc_handle, dom, port);
+
+    return PyInt_FromLong(ret);
+}
+
+static PyObject *pyxc_evtchn_send(PyObject *self,
+                                  PyObject *args,
+                                  PyObject *kwds)
+{
+    XcObject *xc = (XcObject *)self;
+
+    int port, ret;
+
+    static char *kwd_list[] = { "port", NULL };
+
+    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "i", kwd_list, &port) )
+    {
+        DPRINTF("could not parse parameter list.");
+        return NULL;
+    }
+
+    ret = xc_evtchn_send(xc->xc_handle, port);
+
+    return PyInt_FromLong(ret);
+}
+
+static PyObject *pyxc_evtchn_status(PyObject *self,
+                                    PyObject *args,
+                                    PyObject *kwds)
+{
+    XcObject *xc = (XcObject *)self;
+    PyObject *dict;
+
+    u64 dom1 = DOMID_SELF, dom2;
+    int port1, port2, status, ret;
+
+    static char *kwd_list[] = { "port", "dom", NULL };
+
+    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "i|L", kwd_list, 
+                                      &port1, &dom1) )
+    {
+        DPRINTF("could not parse parameter list.");
+        return NULL;
+    }
+
+    ret = xc_evtchn_status(xc->xc_handle, dom1, port1, &dom2, &port2, &status);
+
+    if ( ret < 0 )
+    {
+        dict = Py_BuildValue("{}");
+    }
+    else
+    {
+        switch ( status )
+        {
+        case EVTCHNSTAT_closed:
+            dict = Py_BuildValue("{s:s}", 
+                                 "status", "closed");
+            break;
+        case EVTCHNSTAT_disconnected:
+            dict = Py_BuildValue("{s:s}", 
+                                 "status", "disconnected");
+            break;
+        case EVTCHNSTAT_connected:
+            dict = Py_BuildValue("{s:s,s:L,s:i}", 
+                                 "status", "connected",
+                                 "dom", dom2,
+                                 "port", port2);
+            break;
+        default:
+            dict = Py_BuildValue("{}");
+            break;
+        }
+    }
+    
+    return dict;
+}
+
 static PyObject *pyxc_readconsolering(PyObject *self,
                                       PyObject *args,
                                       PyObject *kwds)
@@ -1031,6 +1158,43 @@ static PyMethodDef pyxc_methods[] = {
       " writeable  [int]:  Bool - is this VBD writeable?\n"
       " nr_sectors [long]: Size of this VBD, in 512-byte sectors.\n" },
 
+    { "evtchn_open", 
+      (PyCFunction)pyxc_evtchn_open, 
+      METH_VARARGS | METH_KEYWORDS, "\n"
+      "Open an event channel between two domains.\n"
+      " dom1 [long, SELF]: First domain to be connected.\n"
+      " dom2 [long]:       Second domain to be connected.\n\n"
+      "Returns: [dict] dictionary is empty on failure.\n"
+      " port1 [int]: Port-id for endpoint at dom1.\n"
+      " port2 [int]: Port-id for endpoint at dom2.\n" },
+
+    { "evtchn_close", 
+      (PyCFunction)pyxc_evtchn_close, 
+      METH_VARARGS | METH_KEYWORDS, "\n"
+      "Close an event channel.\n"
+      " dom  [long, SELF]: Dom-id of one endpoint of the channel.\n"
+      " port [int]:        Port-id of one endpoint of the channel.\n\n"
+      "Returns: [int] 0 on success; -1 on error.\n" },
+
+    { "evtchn_send", 
+      (PyCFunction)pyxc_evtchn_send, 
+      METH_VARARGS | METH_KEYWORDS, "\n"
+      "Send an event along a locally-connected event channel.\n"
+      " port [int]: Port-id of a local channel endpoint.\n\n"
+      "Returns: [int] 0 on success; -1 on error.\n" },
+
+    { "evtchn_status", 
+      (PyCFunction)pyxc_evtchn_status, 
+      METH_VARARGS | METH_KEYWORDS, "\n"
+      "Query the status of an event channel.\n"
+      " dom  [long, SELF]: Dom-id of one endpoint of the channel.\n"
+      " port [int]:        Port-id of one endpoint of the channel.\n\n"
+      "Returns: [dict] dictionary is empty on failure.\n"
+      " status [str]:  'closed', 'disconnected', or 'connected'.\n"
+      "The following are also returned if 'status' is 'connected':\n"
+      " dom  [long]: Port-id for endpoint at dom1.\n"
+      " port [int]:  Port-id for endpoint at dom2.\n" },
+
     { "readconsolering", 
       (PyCFunction)pyxc_readconsolering, 
       METH_VARARGS | METH_KEYWORDS, "\n"
index ad668e8684ef2cbf022a7c9f81e0890af65601d3..7b0291d577eebe851567e3be76dafa43f00ec945 100644 (file)
@@ -83,156 +83,158 @@ static inline unsigned long set_event_disc(struct task_struct *p, int port)
 
 static long event_channel_open(evtchn_open_t *open)
 {
-    struct task_struct *lp, *rp;
-    int                 lport = 0, rport = 0;
+    struct task_struct *p1, *p2;
+    int                 port1 = 0, port2 = 0;
     unsigned long       cpu_mask;
-    domid_t             ldom = open->local_dom, rdom = open->remote_dom;
+    domid_t             dom1 = open->dom1, dom2 = open->dom2;
     long                rc = 0;
 
     if ( !IS_PRIV(current) )
         return -EPERM;
 
-    /* 'local_dom' may be DOMID_SELF. 'remote_dom' cannot be.*/
-    if ( ldom == DOMID_SELF )
-        ldom = current->domain;
+    /* 'dom1' may be DOMID_SELF. 'dom2' cannot be.*/
+    if ( dom1 == DOMID_SELF )
+        dom1 = current->domain;
+    if ( dom2 == DOMID_SELF )
+        return -EINVAL;
 
     /* Event channel must connect distinct domains. */
-    if ( ldom == rdom )
+    if ( dom1 == dom2 )
         return -EINVAL;
 
-    if ( ((lp = find_domain_by_id(ldom)) == NULL) ||
-         ((rp = find_domain_by_id(rdom)) == NULL) )
+    if ( ((p1 = find_domain_by_id(dom1)) == NULL) ||
+         ((p2 = find_domain_by_id(dom2)) == NULL) )
     {
-        if ( lp != NULL )
-            put_task_struct(lp);
+        if ( p1 != NULL )
+            put_task_struct(p1);
         return -ESRCH;
     }
 
     /* Avoid deadlock by first acquiring lock of domain with smaller id. */
-    if ( ldom < rdom )
+    if ( dom1 < dom2 )
     {
-        spin_lock(&lp->event_channel_lock);
-        spin_lock(&rp->event_channel_lock);
+        spin_lock(&p1->event_channel_lock);
+        spin_lock(&p2->event_channel_lock);
     }
     else
     {
-        spin_lock(&rp->event_channel_lock);
-        spin_lock(&lp->event_channel_lock);
+        spin_lock(&p2->event_channel_lock);
+        spin_lock(&p1->event_channel_lock);
     }
 
-    if ( (lport = get_free_port(lp)) < 0 )
+    if ( (port1 = get_free_port(p1)) < 0 )
     {
-        rc = lport;
+        rc = port1;
         goto out;
     }
 
-    if ( (rport = get_free_port(rp)) < 0 )
+    if ( (port2 = get_free_port(p2)) < 0 )
     {
-        rc = rport;
+        rc = port2;
         goto out;
     }
 
-    lp->event_channel[lport].remote_dom  = rp;
-    lp->event_channel[lport].remote_port = (u16)rport;
-    lp->event_channel[lport].state       = ECS_CONNECTED;
+    p1->event_channel[port1].remote_dom  = p2;
+    p1->event_channel[port1].remote_port = (u16)port2;
+    p1->event_channel[port1].state       = ECS_CONNECTED;
 
-    rp->event_channel[rport].remote_dom  = lp;
-    rp->event_channel[rport].remote_port = (u16)lport;
-    rp->event_channel[rport].state       = ECS_CONNECTED;
+    p2->event_channel[port2].remote_dom  = p1;
+    p2->event_channel[port2].remote_port = (u16)port1;
+    p2->event_channel[port2].state       = ECS_CONNECTED;
 
-    cpu_mask  = set_event_pending(lp, lport);
-    cpu_mask |= set_event_pending(rp, rport);
+    cpu_mask  = set_event_pending(p1, port1);
+    cpu_mask |= set_event_pending(p2, port2);
     guest_event_notify(cpu_mask);
     
  out:
-    spin_unlock(&lp->event_channel_lock);
-    spin_unlock(&rp->event_channel_lock);
+    spin_unlock(&p1->event_channel_lock);
+    spin_unlock(&p2->event_channel_lock);
     
-    put_task_struct(lp);
-    put_task_struct(rp);
+    put_task_struct(p1);
+    put_task_struct(p2);
 
-    open->local_port  = lport;
-    open->remote_port = rport;
+    open->port1 = port1;
+    open->port2 = port2;
 
     return rc;
 }
 
 
-static long __event_channel_close(struct task_struct *lp, int lport)
+static long __event_channel_close(struct task_struct *p1, int port1)
 {
-    struct task_struct *rp = NULL;
-    event_channel_t    *lchn, *rchn;
-    int                 rport;
+    struct task_struct *p2 = NULL;
+    event_channel_t    *chn1, *chn2;
+    int                 port2;
     unsigned long       cpu_mask;
     long                rc = 0;
 
  again:
-    spin_lock(&lp->event_channel_lock);
+    spin_lock(&p1->event_channel_lock);
 
-    lchn = lp->event_channel;
+    chn1 = p1->event_channel;
 
-    if ( (lport < 0) || (lport >= lp->max_event_channel) || 
-         (lchn[lport].state == ECS_FREE) )
+    if ( (port1 < 0) || (port1 >= p1->max_event_channel) || 
+         (chn1[port1].state == ECS_FREE) )
     {
         rc = -EINVAL;
         goto out;
     }
 
-    if ( lchn[lport].state == ECS_CONNECTED )
+    if ( chn1[port1].state == ECS_CONNECTED )
     {
-        if ( rp == NULL )
+        if ( p2 == NULL )
         {
-            rp = lchn[lport].remote_dom;
-            get_task_struct(rp);
+            p2 = chn1[port1].remote_dom;
+            get_task_struct(p2);
 
-            if ( lp->domain < rp->domain )
+            if ( p1->domain < p2->domain )
             {
-                spin_lock(&rp->event_channel_lock);
+                spin_lock(&p2->event_channel_lock);
             }
             else
             {
-                spin_unlock(&lp->event_channel_lock);
-                spin_lock(&rp->event_channel_lock);
+                spin_unlock(&p1->event_channel_lock);
+                spin_lock(&p2->event_channel_lock);
                 goto again;
             }
         }
-        else if ( rp != lchn[lport].remote_dom )
+        else if ( p2 != chn1[port1].remote_dom )
         {
             rc = -EINVAL;
             goto out;
         }
         
-        rchn  = rp->event_channel;
-        rport = lchn[lport].remote_port;
+        chn2  = p2->event_channel;
+        port2 = chn1[port1].remote_port;
 
-        if ( rport >= rp->max_event_channel )
+        if ( port2 >= p2->max_event_channel )
             BUG();
-        if ( rchn[rport].state != ECS_CONNECTED )
+        if ( chn2[port2].state != ECS_CONNECTED )
             BUG();
-        if ( rchn[rport].remote_dom != lp )
+        if ( chn2[port2].remote_dom != p1 )
             BUG();
 
-        rchn[rport].state       = ECS_ZOMBIE;
-        rchn[rport].remote_dom  = NULL;
-        rchn[rport].remote_port = 0xFFFF;
+        chn2[port2].state       = ECS_ZOMBIE;
+        chn2[port2].remote_dom  = NULL;
+        chn2[port2].remote_port = 0xFFFF;
 
-        cpu_mask  = set_event_disc(lp, lport);
-        cpu_mask |= set_event_disc(rp, rport);
+        cpu_mask  = set_event_disc(p1, port1);
+        cpu_mask |= set_event_disc(p2, port2);
         guest_event_notify(cpu_mask);
     }
 
-    lchn[lport].state       = ECS_FREE;
-    lchn[lport].remote_dom  = NULL;
-    lchn[lport].remote_port = 0xFFFF;
+    chn1[port1].state       = ECS_FREE;
+    chn1[port1].remote_dom  = NULL;
+    chn1[port1].remote_port = 0xFFFF;
     
  out:
-    spin_unlock(&lp->event_channel_lock);
-    put_task_struct(lp);
+    spin_unlock(&p1->event_channel_lock);
+    put_task_struct(p1);
 
-    if ( rp != NULL )
+    if ( p2 != NULL )
     {
-        spin_unlock(&rp->event_channel_lock);
-        put_task_struct(rp);
+        spin_unlock(&p2->event_channel_lock);
+        put_task_struct(p2);
     }
     
     return rc;
@@ -241,22 +243,21 @@ static long __event_channel_close(struct task_struct *lp, int lport)
 
 static long event_channel_close(evtchn_close_t *close)
 {
-    struct task_struct *lp;
-    int                 lport = close->local_port;
+    struct task_struct *p;
     long                rc;
-    domid_t             ldom = close->local_dom;
+    domid_t             dom = close->dom;
 
-    if ( ldom == DOMID_SELF )
-        ldom = current->domain;
+    if ( dom == DOMID_SELF )
+        dom = current->domain;
     else if ( !IS_PRIV(current) )
         return -EPERM;
 
-    if ( (lp = find_domain_by_id(ldom)) == NULL )
+    if ( (p = find_domain_by_id(dom)) == NULL )
         return -ESRCH;
 
-    rc = __event_channel_close(lp, lport);
+    rc = __event_channel_close(p, close->port);
 
-    put_task_struct(lp);
+    put_task_struct(p);
     return rc;
 }
 
@@ -295,30 +296,30 @@ static long event_channel_send(int lport)
 
 static long event_channel_status(evtchn_status_t *status)
 {
-    struct task_struct *lp;
-    domid_t             ldom = status->local_dom;
-    int                 lport = status->local_port;
-    event_channel_t    *lchn;
+    struct task_struct *p;
+    domid_t             dom = status->dom1;
+    int                 port = status->port1;
+    event_channel_t    *chn;
 
-    if ( ldom == DOMID_SELF )
-        ldom = current->domain;
+    if ( dom == DOMID_SELF )
+        dom = current->domain;
     else if ( !IS_PRIV(current) )
         return -EPERM;
 
-    if ( (lp = find_domain_by_id(ldom)) == NULL )
+    if ( (p = find_domain_by_id(dom)) == NULL )
         return -ESRCH;
 
-    spin_lock(&lp->event_channel_lock);
+    spin_lock(&p->event_channel_lock);
 
-    lchn = lp->event_channel;
+    chn = p->event_channel;
 
-    if ( (lport < 0) || (lport >= lp->max_event_channel) )
+    if ( (port < 0) || (port >= p->max_event_channel) )
     {
-        spin_unlock(&lp->event_channel_lock);
+        spin_unlock(&p->event_channel_lock);
         return -EINVAL;
     }
 
-    switch ( lchn[lport].state )
+    switch ( chn[port].state )
     {
     case ECS_FREE:
         status->status = EVTCHNSTAT_closed;
@@ -328,14 +329,14 @@ static long event_channel_status(evtchn_status_t *status)
         break;
     case ECS_CONNECTED:
         status->status = EVTCHNSTAT_connected;
-        status->remote_dom  = lchn[lport].remote_dom->domain;
-        status->remote_port = lchn[lport].remote_port;
+        status->dom2   = chn[port].remote_dom->domain;
+        status->port2  = chn[port].remote_port;
         break;
     default:
         BUG();
     }
 
-    spin_unlock(&lp->event_channel_lock);
+    spin_unlock(&p->event_channel_lock);
     return 0;
 }
 
index e7c3aa7e1fc0af45a8ca8202874fd64ffe5df70c..573953d24600e29a8946a1c0df687f6a5514d1e6 100644 (file)
 #define __HYPERVISOR_IFS__EVENT_CHANNEL_H__
 
 /*
- * EVTCHNOP_open: Open a communication channel between <local_dom> and
- * <remote_dom>.
+ * EVTCHNOP_open: Open a communication channel between <dom1> and <dom2>.
  * NOTES:
- *  1. <local_dom> may be specified as DOMID_SELF.
+ *  1. <dom1> may be specified as DOMID_SELF.
  *  2. Only a sufficiently-privileged domain may create an event channel.
- *  3. <local_port> and <remote_port> are only supplied if the op succeeds.
+ *  3. <port1> and <port2> are only supplied if the op succeeds.
  */
 #define EVTCHNOP_open           0
 typedef struct evtchn_open
 {
     /* IN parameters. */
-    domid_t local_dom, remote_dom;
+    domid_t dom1, dom2;
     /* OUT parameters. */
-    int     local_port, remote_port;
+    int     port1, port2;
 } evtchn_open_t;
 
 /*
  * EVTCHNOP_close: Close the communication channel which has an endpoint at
- * <local_dom, local_port>.
+ * <dom, port>.
  * NOTES:
- *  1. <local_dom> may be specified as DOMID_SELF.
+ *  1. <dom> may be specified as DOMID_SELF.
  *  2. Only a sufficiently-privileged domain may close an event channel
- *     for which <local_dom> is not DOMID_SELF.
+ *     for which <dom> is not DOMID_SELF.
  */
 #define EVTCHNOP_close          1
 typedef struct evtchn_close
 {
     /* IN parameters. */
-    domid_t local_dom;
-    int     local_port;
+    domid_t dom;
+    int     port;
     /* No OUT parameters. */
 } evtchn_close_t;
 
@@ -57,22 +56,22 @@ typedef struct evtchn_send
 
 /*
  * EVTCHNOP_status: Get the current status of the communication channel which
- * has an endpoint at <local_dom, local_port>.
+ * has an endpoint at <dom1, port1>.
  * NOTES:
- *  1. <local_dom> may be specified as DOMID_SELF.
+ *  1. <dom1> may be specified as DOMID_SELF.
  *  2. Only a sufficiently-privileged domain may obtain the status of an event
- *     channel for which <local_dom> is not DOMID_SELF.
- *  3. <remote_dom, remote_port> is only supplied if status is 'connected'.
+ *     channel for which <dom1> is not DOMID_SELF.
+ *  3. <dom2, port2> is only supplied if status is 'connected'.
  */
 #define EVTCHNOP_status         3  /* Get status of <channel id>.         */
 typedef struct evtchn_status
 {
     /* IN parameters */
-    domid_t local_dom;
-    int     local_port;
+    domid_t dom1;
+    int     port1;
     /* OUT parameters */
-    domid_t remote_dom;
-    int     remote_port;
+    domid_t dom2;
+    int     port2;
 #define EVTCHNSTAT_closed       0  /* Chennel is not in use.              */
 #define EVTCHNSTAT_disconnected 1  /* Channel is not connected to remote. */
 #define EVTCHNSTAT_connected    2  /* Channel is connected to remote.     */